Close

@InProceedings{Cardenas:2018:MuHuAc,
               author = "Cardenas, Edwin Jonathan Escobedo",
          affiliation = "{Federal University of Ouro Preto}",
                title = "Multimodal Human Action Recognition Based on a Fusion of Dynamic 
                         Images using CNN descriptors",
            booktitle = "Proceedings...",
                 year = "2018",
               editor = "Ross, Arun and Gastal, Eduardo S. L. and Jorge, Joaquim A. and 
                         Queiroz, Ricardo L. de and Minetto, Rodrigo and Sarkar, Sudeep and 
                         Papa, Jo{\~a}o Paulo and Oliveira, Manuel M. and Arbel{\'a}ez, 
                         Pablo and Mery, Domingo and Oliveira, Maria Cristina Ferreira de 
                         and Spina, Thiago Vallin and Mendes, Caroline Mazetto and Costa, 
                         Henrique S{\'e}rgio Gutierrez and Mejail, Marta Estela and Geus, 
                         Klaus de and Scheer, Sergio",
         organization = "Conference on Graphics, Patterns and Images, 31. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "action recognition, dynamic images, RGB-D data, kinect, CNN.",
             abstract = "In this paper, we propose the use of dynamic-images-based approach 
                         for action recognition. Specifically, we exploit the multimodal 
                         information recorded by a Kinect sensor (RGB-D and skeleton joint 
                         data). We combine several ideas from rank pooling and skeleton 
                         optical spectra to generate dynamic images to summarize an action 
                         sequence into single flow images. We group our dynamic images into 
                         five groups: a dynamic color group (DC); a dynamic depth group 
                         (DD) and three dynamic skeleton groups (DXY, DYZ, DXZ). As action 
                         is composed of different postures along time, we generated N 
                         different dynamic images with the main postures for each dynamic 
                         group. Next, we applied a pre-trained flow-CNN to extract 
                         spatiotemporal features with a max-mean aggregation. The proposed 
                         method was evaluated on a public benchmark dataset, the UTD-MHAD, 
                         and achieved the state-of-the-art result.",
  conference-location = "Foz do Igua{\c{c}}u, PR, Brazil",
      conference-year = "29 Oct.-1 Nov. 2018",
                  doi = "10.1109/SIBGRAPI.2018.00019",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2018.00019",
             language = "en",
                  ibi = "8JMKD3MGPAW/3RQE3SH",
                  url = "http://urlib.net/ibi/8JMKD3MGPAW/3RQE3SH",
           targetfile = "
                         
                         Multimodal_Human_Action_Recognition_Based_on_a_Fusion_of_Dynamic_Images_using_CNN_descriptors.pdf",
        urlaccessdate = "2024, Apr. 28"
}


Close